__notebook_source__.ipynb
No Headings
The table of contents shows headings in notebooks and supported files.
- File
- Edit
- View
- Run
- Kernel
- Settings
- Help
Kernel Unknown
[60]:
!pip install lime
Requirement already satisfied: lime in /usr/local/lib/python3.11/dist-packages (0.2.0.1) Requirement already satisfied: matplotlib in /usr/local/lib/python3.11/dist-packages (from lime) (3.7.2) Requirement already satisfied: numpy in /usr/local/lib/python3.11/dist-packages (from lime) (1.26.4) Requirement already satisfied: scipy in /usr/local/lib/python3.11/dist-packages (from lime) (1.15.3) Requirement already satisfied: tqdm in /usr/local/lib/python3.11/dist-packages (from lime) (4.67.1) Requirement already satisfied: scikit-learn>=0.18 in /usr/local/lib/python3.11/dist-packages (from lime) (1.2.2) Requirement already satisfied: scikit-image>=0.12 in /usr/local/lib/python3.11/dist-packages (from lime) (0.25.2) Requirement already satisfied: networkx>=3.0 in /usr/local/lib/python3.11/dist-packages (from scikit-image>=0.12->lime) (3.5) Requirement already satisfied: pillow>=10.1 in /usr/local/lib/python3.11/dist-packages (from scikit-image>=0.12->lime) (11.2.1) Requirement already satisfied: imageio!=2.35.0,>=2.33 in /usr/local/lib/python3.11/dist-packages (from scikit-image>=0.12->lime) (2.37.0) Requirement already satisfied: tifffile>=2022.8.12 in /usr/local/lib/python3.11/dist-packages (from scikit-image>=0.12->lime) (2025.6.11) Requirement already satisfied: packaging>=21 in /usr/local/lib/python3.11/dist-packages (from scikit-image>=0.12->lime) (25.0) Requirement already satisfied: lazy-loader>=0.4 in /usr/local/lib/python3.11/dist-packages (from scikit-image>=0.12->lime) (0.4) Requirement already satisfied: mkl_fft in /usr/local/lib/python3.11/dist-packages (from numpy->lime) (1.3.8) Requirement already satisfied: mkl_random in /usr/local/lib/python3.11/dist-packages (from numpy->lime) (1.2.4) Requirement already satisfied: mkl_umath in /usr/local/lib/python3.11/dist-packages (from numpy->lime) (0.1.1) Requirement already satisfied: mkl in /usr/local/lib/python3.11/dist-packages (from numpy->lime) (2025.2.0) Requirement already satisfied: tbb4py in /usr/local/lib/python3.11/dist-packages (from numpy->lime) (2022.2.0) Requirement already satisfied: mkl-service in /usr/local/lib/python3.11/dist-packages (from numpy->lime) (2.4.1) Requirement already satisfied: joblib>=1.1.1 in /usr/local/lib/python3.11/dist-packages (from scikit-learn>=0.18->lime) (1.5.1) Requirement already satisfied: threadpoolctl>=2.0.0 in /usr/local/lib/python3.11/dist-packages (from scikit-learn>=0.18->lime) (3.6.0) Requirement already satisfied: contourpy>=1.0.1 in /usr/local/lib/python3.11/dist-packages (from matplotlib->lime) (1.3.2) Requirement already satisfied: cycler>=0.10 in /usr/local/lib/python3.11/dist-packages (from matplotlib->lime) (0.12.1) Requirement already satisfied: fonttools>=4.22.0 in /usr/local/lib/python3.11/dist-packages (from matplotlib->lime) (4.58.4) Requirement already satisfied: kiwisolver>=1.0.1 in /usr/local/lib/python3.11/dist-packages (from matplotlib->lime) (1.4.8) Requirement already satisfied: pyparsing<3.1,>=2.3.1 in /usr/local/lib/python3.11/dist-packages (from matplotlib->lime) (3.0.9) Requirement already satisfied: python-dateutil>=2.7 in /usr/local/lib/python3.11/dist-packages (from matplotlib->lime) (2.9.0.post0) Requirement already satisfied: six>=1.5 in /usr/local/lib/python3.11/dist-packages (from python-dateutil>=2.7->matplotlib->lime) (1.17.0) Requirement already satisfied: intel-openmp<2026,>=2024 in /usr/local/lib/python3.11/dist-packages (from mkl->numpy->lime) (2024.2.0) Requirement already satisfied: tbb==2022.* in /usr/local/lib/python3.11/dist-packages (from mkl->numpy->lime) (2022.2.0) Requirement already satisfied: tcmlib==1.* in /usr/local/lib/python3.11/dist-packages (from tbb==2022.*->mkl->numpy->lime) (1.4.0) Requirement already satisfied: intel-cmplr-lib-rt in /usr/local/lib/python3.11/dist-packages (from mkl_umath->numpy->lime) (2024.2.0) Requirement already satisfied: intel-cmplr-lib-ur==2024.2.0 in /usr/local/lib/python3.11/dist-packages (from intel-openmp<2026,>=2024->mkl->numpy->lime) (2024.2.0)
[1]:
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
import os
import warnings
warnings.filterwarnings('ignore')
[2]:
df=pd.read_csv("/kaggle/input/deepfake-faces/metadata.csv")
df.head()
[2]:
| videoname | original_width | original_height | label | original | |
|---|---|---|---|---|---|
| 0 | aznyksihgl.mp4 | 129 | 129 | FAKE | xnojggkrxt.mp4 |
| 1 | gkwmalrvcj.mp4 | 129 | 129 | FAKE | hqqmtxvbjj.mp4 |
| 2 | lxnqzocgaq.mp4 | 223 | 217 | FAKE | xjzkfqddyk.mp4 |
| 3 | itsbtrrelv.mp4 | 186 | 186 | FAKE | kqvepwqxfe.mp4 |
| 4 | ddvgrczjno.mp4 | 155 | 155 | FAKE | pluadmqqta.mp4 |
[3]:
def classify_features(df):
catagorical_feature=[]
non_catagorical_feature=[]
discret_feature=[]
continous_feature=[]
for column in df.columns:
if df[column].dtype=="object":
if df[column].nunique()<10:
catagorical_feature.append(column)
else:
non_catagorical_feature.append(column)
elif df[column].dtype in ["int64","float64"]:
if df[column].nunique()<10:
discret_features.append(column)
else:
continous_feature.append(column)
return catagorical_feature,non_catagorical_feature,discret_feature,continous_feature
[4]:
catagorical,non_catagorical,discret,continous=classify_features(df)
print("catagorical: ",catagorical)
print("non_catagorical: ",non_catagorical)
print("discret: ",discret)
print("continous: ",continous)
catagorical: ['label'] non_catagorical: ['videoname', 'original'] discret: [] continous: ['original_width', 'original_height']
[5]:
df.isna().sum()
[5]:
videoname 0 original_width 0 original_height 0 label 0 original 16293 dtype: int64
[6]:
df=df.fillna("Not Available")
[7]:
for i in catagorical:
print(i,':', df[i].unique())
print()
for i in catagorical:
print(df[i].value_counts())
print()
label : ['FAKE' 'REAL'] label FAKE 79341 REAL 16293 Name: count, dtype: int64
[8]:
for i in catagorical:
plt.figure(figsize=(15,6))
sns.countplot(x = df[i], data = df, palette = 'hls')
plt.show()
Data Balancing
Data Balancing
[9]:
real_df = df[df["label"] == "REAL"]
fake_df = df[df["label"] == "FAKE"]
sample_size = 10000
real_df = real_df.sample(sample_size, random_state=42)
fake_df = fake_df.sample(sample_size, random_state=42)
sample_meta = pd.concat([real_df, fake_df])
[10]:
from sklearn.model_selection import train_test_split
Train_set, Test_set = train_test_split(sample_meta,test_size=0.2,random_state=42,stratify=sample_meta['label'])
Train_set, Val_set = train_test_split(Train_set,test_size=0.3,random_state=42,stratify=Train_set['label'])
Train_set.shape,Val_set.shape,Test_set.shape
[10]:
((11200, 5), (4800, 5), (4000, 5))
[11]:
import cv2
image_path="/kaggle/input/deepfake-faces/faces_224/"
image_file=os.listdir(image_path)
image_file.sort()
selected_image=image_file[:9]
plt.figure(figsize=(10,10))
for index,image_file in enumerate(selected_image):
image=cv2.imread(os.path.join(image_path,image_file))
plt.subplot(3,3,index+1)
plt.imshow(cv2.cvtColor(image, cv2.COLOR_BGR2RGB))
plt.title(f'Image {index + 1}')
plt.axis('off')
plt.show()
[12]:
def retreive_dataset(set_name):
images,labels=[],[]
for (img, imclass) in zip(set_name['videoname'], set_name['label']):
images.append(cv2.imread('../input/deepfake-faces/faces_224/'+img[:-4]+'.jpg'))
if(imclass=='FAKE'):
labels.append(1)
else:
labels.append(0)
return np.array(images),np.array(labels)
[13]:
X_train,y_train=retreive_dataset(Train_set)
X_val,y_val=retreive_dataset(Val_set)
X_test,y_test=retreive_dataset(Test_set)
[14]:
import tensorflow as tf
from tensorflow.keras import layers, models
from functools import partial
tf.random.set_seed(42)
2025-09-23 05:46:53.513236: E external/local_xla/xla/stream_executor/cuda/cuda_fft.cc:477] Unable to register cuFFT factory: Attempting to register factory for plugin cuFFT when one has already been registered WARNING: All log messages before absl::InitializeLog() is called are written to STDERR E0000 00:00:1758606413.687765 36 cuda_dnn.cc:8310] Unable to register cuDNN factory: Attempting to register factory for plugin cuDNN when one has already been registered E0000 00:00:1758606413.739889 36 cuda_blas.cc:1418] Unable to register cuBLAS factory: Attempting to register factory for plugin cuBLAS when one has already been registered
[15]:
IMAGE_SIZE=224
BATCH_SIZE=32
EPOCHS=10
[16]:
from tensorflow.keras.callbacks import ModelCheckpoint, EarlyStopping, ReduceLROnPlateau
# Save the best model based on validation loss
def get_callbacks(name):
checkpoint_cb = ModelCheckpoint(
filepath=f'best_model_{name}.h5', # Use '.keras' if using newer TF versions
monitor='val_loss',
save_best_only=True,
save_weights_only=False,
verbose=1
)
# Stop training early if validation loss doesn't improve
earlystop_cb = EarlyStopping(
monitor='val_loss',
patience=3, # Number of epochs with no improvement
restore_best_weights=True,
verbose=1
)
# Reduce learning rate when validation loss plateaus
reduce_lr_cb = ReduceLROnPlateau(
monitor='val_loss',
factor=0.5,
patience=5,
min_lr=1e-6,
verbose=1
)
# Combine all callbacks
callbacks = [checkpoint_cb, earlystop_cb, reduce_lr_cb]
return callbacks
[17]:
DefaultConv2D = partial(layers.Conv2D, kernel_size=3, padding="same",
activation="relu", kernel_initializer="he_normal")
# Model Definition
model = models.Sequential([
DefaultConv2D(filters=64, kernel_size=7, input_shape=[IMAGE_SIZE, IMAGE_SIZE, 3]),
layers.MaxPooling2D(),
layers.BatchNormalization(),
DefaultConv2D(filters=128),
DefaultConv2D(filters=128),
layers.MaxPooling2D(),
layers.BatchNormalization(),
layers.Flatten(),
layers.Dense(units=128, activation="relu",
kernel_initializer="he_normal"),
layers.BatchNormalization(),
layers.Dropout(0.5),
layers.Dense(units=64, activation="relu",
kernel_initializer="he_normal"),
layers.BatchNormalization(),
layers.Dropout(0.5),
layers.Dense(units=1, activation="sigmoid")
])
I0000 00:00:1758606425.304481 36 gpu_device.cc:2022] Created device /job:localhost/replica:0/task:0/device:GPU:0 with 13942 MB memory: -> device: 0, name: Tesla T4, pci bus id: 0000:00:04.0, compute capability: 7.5 I0000 00:00:1758606425.305187 36 gpu_device.cc:2022] Created device /job:localhost/replica:0/task:0/device:GPU:1 with 13942 MB memory: -> device: 1, name: Tesla T4, pci bus id: 0000:00:05.0, compute capability: 7.5
[18]:
initial_learning_rate = 0.001
lr_schedule = tf.keras.optimizers.schedules.ExponentialDecay(
initial_learning_rate, decay_steps=100000, decay_rate=0.96, staircase=True
)
[19]:
model.compile(
optimizer=tf.keras.optimizers.Adam(learning_rate=lr_schedule),
loss="binary_crossentropy",
metrics=[
"accuracy",
tf.keras.metrics.Recall(name='recall'),
tf.keras.metrics.Precision(name='precision')
]
)
[20]:
model.summary()
Model: "sequential"
┏━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━┳━━━━━━━━━━━━━━━━━━━━━━━━┳━━━━━━━━━━━━━━━┓ ┃ Layer (type) ┃ Output Shape ┃ Param # ┃ ┡━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━╇━━━━━━━━━━━━━━━━━━━━━━━━╇━━━━━━━━━━━━━━━┩ │ conv2d (Conv2D) │ (None, 224, 224, 64) │ 9,472 │ ├─────────────────────────────────┼────────────────────────┼───────────────┤ │ max_pooling2d (MaxPooling2D) │ (None, 112, 112, 64) │ 0 │ ├─────────────────────────────────┼────────────────────────┼───────────────┤ │ batch_normalization │ (None, 112, 112, 64) │ 256 │ │ (BatchNormalization) │ │ │ ├─────────────────────────────────┼────────────────────────┼───────────────┤ │ conv2d_1 (Conv2D) │ (None, 112, 112, 128) │ 73,856 │ ├─────────────────────────────────┼────────────────────────┼───────────────┤ │ conv2d_2 (Conv2D) │ (None, 112, 112, 128) │ 147,584 │ ├─────────────────────────────────┼────────────────────────┼───────────────┤ │ max_pooling2d_1 (MaxPooling2D) │ (None, 56, 56, 128) │ 0 │ ├─────────────────────────────────┼────────────────────────┼───────────────┤ │ batch_normalization_1 │ (None, 56, 56, 128) │ 512 │ │ (BatchNormalization) │ │ │ ├─────────────────────────────────┼────────────────────────┼───────────────┤ │ flatten (Flatten) │ (None, 401408) │ 0 │ ├─────────────────────────────────┼────────────────────────┼───────────────┤ │ dense (Dense) │ (None, 128) │ 51,380,352 │ ├─────────────────────────────────┼────────────────────────┼───────────────┤ │ batch_normalization_2 │ (None, 128) │ 512 │ │ (BatchNormalization) │ │ │ ├─────────────────────────────────┼────────────────────────┼───────────────┤ │ dropout (Dropout) │ (None, 128) │ 0 │ ├─────────────────────────────────┼────────────────────────┼───────────────┤ │ dense_1 (Dense) │ (None, 64) │ 8,256 │ ├─────────────────────────────────┼────────────────────────┼───────────────┤ │ batch_normalization_3 │ (None, 64) │ 256 │ │ (BatchNormalization) │ │ │ ├─────────────────────────────────┼────────────────────────┼───────────────┤ │ dropout_1 (Dropout) │ (None, 64) │ 0 │ ├─────────────────────────────────┼────────────────────────┼───────────────┤ │ dense_2 (Dense) │ (None, 1) │ 65 │ └─────────────────────────────────┴────────────────────────┴───────────────┘
Total params: 51,621,121 (196.92 MB)
Trainable params: 51,620,353 (196.92 MB)
Non-trainable params: 768 (3.00 KB)
[21]:
history = model.fit(
X_train,
y_train,
epochs=10,
callbacks=get_callbacks('custom'),
batch_size=BATCH_SIZE,
validation_data=(X_val,y_val),
verbose=1
)
Epoch 1/10
WARNING: All log messages before absl::InitializeLog() is called are written to STDERR I0000 00:00:1758606435.790639 101 service.cc:148] XLA service 0x7e9ab4006430 initialized for platform CUDA (this does not guarantee that XLA will be used). Devices: I0000 00:00:1758606435.791181 101 service.cc:156] StreamExecutor device (0): Tesla T4, Compute Capability 7.5 I0000 00:00:1758606435.791225 101 service.cc:156] StreamExecutor device (1): Tesla T4, Compute Capability 7.5 I0000 00:00:1758606436.312170 101 cuda_dnn.cc:529] Loaded cuDNN version 90300 I0000 00:00:1758606450.879275 101 device_compiler.h:188] Compiled cluster using XLA! This line is logged at most once for the lifetime of the process.
350/350 ━━━━━━━━━━━━━━━━━━━━ 0s 143ms/step - accuracy: 0.5246 - loss: 0.9344 - precision: 0.5306 - recall: 0.5318 Epoch 1: val_loss improved from inf to 0.79419, saving model to best_model_custom.h5 350/350 ━━━━━━━━━━━━━━━━━━━━ 85s 184ms/step - accuracy: 0.5247 - loss: 0.9341 - precision: 0.5306 - recall: 0.5319 - val_accuracy: 0.5221 - val_loss: 0.7942 - val_precision: 0.5798 - val_recall: 0.1604 - learning_rate: 0.0010 Epoch 2/10 350/350 ━━━━━━━━━━━━━━━━━━━━ 0s 149ms/step - accuracy: 0.5587 - loss: 0.7276 - precision: 0.5658 - recall: 0.5567 Epoch 2: val_loss improved from 0.79419 to 0.65845, saving model to best_model_custom.h5 350/350 ━━━━━━━━━━━━━━━━━━━━ 61s 175ms/step - accuracy: 0.5587 - loss: 0.7276 - precision: 0.5658 - recall: 0.5567 - val_accuracy: 0.5979 - val_loss: 0.6584 - val_precision: 0.5641 - val_recall: 0.8612 - learning_rate: 0.0010 Epoch 3/10 350/350 ━━━━━━━━━━━━━━━━━━━━ 0s 150ms/step - accuracy: 0.6110 - loss: 0.6655 - precision: 0.6107 - recall: 0.6406 Epoch 3: val_loss did not improve from 0.65845 350/350 ━━━━━━━━━━━━━━━━━━━━ 59s 169ms/step - accuracy: 0.6110 - loss: 0.6655 - precision: 0.6107 - recall: 0.6405 - val_accuracy: 0.5073 - val_loss: 1.3706 - val_precision: 0.6446 - val_recall: 0.0325 - learning_rate: 0.0010 Epoch 4/10 350/350 ━━━━━━━━━━━━━━━━━━━━ 0s 150ms/step - accuracy: 0.6426 - loss: 0.6314 - precision: 0.6413 - recall: 0.6692 Epoch 4: val_loss did not improve from 0.65845 350/350 ━━━━━━━━━━━━━━━━━━━━ 59s 169ms/step - accuracy: 0.6426 - loss: 0.6314 - precision: 0.6413 - recall: 0.6692 - val_accuracy: 0.5367 - val_loss: 0.8703 - val_precision: 0.8359 - val_recall: 0.0913 - learning_rate: 0.0010 Epoch 5/10 350/350 ━━━━━━━━━━━━━━━━━━━━ 0s 150ms/step - accuracy: 0.6529 - loss: 0.6132 - precision: 0.6566 - recall: 0.6607 Epoch 5: val_loss improved from 0.65845 to 0.61754, saving model to best_model_custom.h5 350/350 ━━━━━━━━━━━━━━━━━━━━ 61s 175ms/step - accuracy: 0.6529 - loss: 0.6132 - precision: 0.6566 - recall: 0.6608 - val_accuracy: 0.6510 - val_loss: 0.6175 - val_precision: 0.6891 - val_recall: 0.5504 - learning_rate: 0.0010 Epoch 6/10 350/350 ━━━━━━━━━━━━━━━━━━━━ 0s 150ms/step - accuracy: 0.6884 - loss: 0.5886 - precision: 0.6831 - recall: 0.7190 Epoch 6: val_loss improved from 0.61754 to 0.61076, saving model to best_model_custom.h5 350/350 ━━━━━━━━━━━━━━━━━━━━ 61s 175ms/step - accuracy: 0.6884 - loss: 0.5885 - precision: 0.6831 - recall: 0.7189 - val_accuracy: 0.6615 - val_loss: 0.6108 - val_precision: 0.7084 - val_recall: 0.5487 - learning_rate: 0.0010 Epoch 7/10 350/350 ━━━━━━━━━━━━━━━━━━━━ 0s 150ms/step - accuracy: 0.7158 - loss: 0.5415 - precision: 0.7098 - recall: 0.7424 Epoch 7: val_loss did not improve from 0.61076 350/350 ━━━━━━━━━━━━━━━━━━━━ 59s 168ms/step - accuracy: 0.7158 - loss: 0.5415 - precision: 0.7098 - recall: 0.7424 - val_accuracy: 0.6233 - val_loss: 0.6756 - val_precision: 0.7236 - val_recall: 0.3992 - learning_rate: 0.0010 Epoch 8/10 350/350 ━━━━━━━━━━━━━━━━━━━━ 0s 149ms/step - accuracy: 0.7532 - loss: 0.4999 - precision: 0.7456 - recall: 0.7787 Epoch 8: val_loss did not improve from 0.61076 350/350 ━━━━━━━━━━━━━━━━━━━━ 59s 168ms/step - accuracy: 0.7532 - loss: 0.4999 - precision: 0.7456 - recall: 0.7787 - val_accuracy: 0.6404 - val_loss: 0.6344 - val_precision: 0.6123 - val_recall: 0.7654 - learning_rate: 0.0010 Epoch 9/10 350/350 ━━━━━━━━━━━━━━━━━━━━ 0s 149ms/step - accuracy: 0.7850 - loss: 0.4459 - precision: 0.7783 - recall: 0.8050 Epoch 9: val_loss did not improve from 0.61076 350/350 ━━━━━━━━━━━━━━━━━━━━ 59s 167ms/step - accuracy: 0.7850 - loss: 0.4458 - precision: 0.7783 - recall: 0.8051 - val_accuracy: 0.5454 - val_loss: 1.8196 - val_precision: 0.7535 - val_recall: 0.1350 - learning_rate: 0.0010 Epoch 9: early stopping Restoring model weights from the end of the best epoch: 6.
[22]:
y_pred = model.predict(X_test)
125/125 ━━━━━━━━━━━━━━━━━━━━ 6s 43ms/step
[23]:
from sklearn.metrics import accuracy_score, f1_score, precision_score, recall_score, confusion_matrix, classification_report, roc_auc_score
[24]:
y_test_pred_binary = (y_pred > 0.5).astype(int)
[25]:
y_train_pred = model.predict(X_train)
y_train_pred_binary = (y_train_pred > 0.5).astype(int)
350/350 ━━━━━━━━━━━━━━━━━━━━ 15s 43ms/step
[26]:
from sklearn.metrics import accuracy_score, f1_score, precision_score, recall_score, confusion_matrix, classification_report, roc_auc_score
train_accuracy = accuracy_score(y_train, y_train_pred_binary)
print(f"Training Accuracy: {train_accuracy * 100:.2f}%")
Training Accuracy: 72.52%
[27]:
test_accuracy = accuracy_score(y_test, y_test_pred_binary)
print(f"Test Accuracy: {test_accuracy * 100:.2f}%")
Test Accuracy: 66.40%
[28]:
f1 = f1_score(y_test, y_test_pred_binary)
print(f"F1 Score: {f1:.4f}")
precision = precision_score(y_test, y_test_pred_binary)
print(f"Precison: {precision:.4f}")
recall = recall_score(y_test, y_test_pred_binary)
print(f"Recall: {recall:.4f}")
# Calculate AUC-ROC
auc_roc = roc_auc_score(y_test, y_test_pred_binary)
print(f"AUC-ROC: {auc_roc:.4f}")
F1 Score: 0.6201 Precison: 0.7133 Recall: 0.5485 AUC-ROC: 0.6640
[29]:
conf_matrix = confusion_matrix(y_test, y_test_pred_binary)
print("Confusion Matrix:")
print(conf_matrix)
Confusion Matrix: [[1559 441] [ 903 1097]]
[30]:
import seaborn as sns
sns.heatmap(conf_matrix, annot=True)
plt.xlabel('Predicted Label')
plt.ylabel('True Label')
plt.title('Confusion Matrix of custom model')
plt.tight_layout()
plt.show()
[31]:
class_report = classification_report(y_test, y_test_pred_binary)
print("Classification Report:")
print(class_report)
Classification Report:
precision recall f1-score support
0 0.63 0.78 0.70 2000
1 0.71 0.55 0.62 2000
accuracy 0.66 4000
macro avg 0.67 0.66 0.66 4000
weighted avg 0.67 0.66 0.66 4000
[32]:
import matplotlib.pyplot as plt
# Unpack training history
hist = history.history
# Create 2x2 subplot
plt.figure(figsize=(14, 10))
# Accuracy
plt.subplot(2, 2, 1)
plt.plot(hist['accuracy'], label='Train Accuracy')
plt.plot(hist['val_accuracy'], label='Val Accuracy')
plt.title('Accuracy')
plt.xlabel('Epoch')
plt.ylabel('Accuracy')
plt.legend()
# Loss
plt.subplot(2, 2, 2)
plt.plot(hist['loss'], label='Train Loss')
plt.plot(hist['val_loss'], label='Val Loss')
plt.title('Loss')
plt.xlabel('Epoch')
plt.ylabel('Loss')
plt.legend()
# Precision
plt.subplot(2, 2, 3)
plt.plot(hist['precision'], label='Train Precision')
plt.plot(hist['val_precision'], label='Val Precision')
plt.title('Precision')
plt.xlabel('Epoch')
plt.ylabel('Precision')
plt.legend()
# Recall
plt.subplot(2, 2, 4)
plt.plot(hist['recall'], label='Train Recall')
plt.plot(hist['val_recall'], label='Val Recall')
plt.title('Recall')
plt.xlabel('Epoch')
plt.ylabel('Recall')
plt.legend()
plt.tight_layout()
plt.show()
[33]:
model.save("custom_model_ep10.keras")
[34]:
from tensorflow.keras.applications import ResNet50
base_model = ResNet50(weights='imagenet', include_top=False, input_shape=(IMAGE_SIZE,IMAGE_SIZE,3))
for layer in base_model.layers:
layer.trainable = False
model_resnet50 = models.Sequential()
model_resnet50.add(base_model)
model_resnet50.add(layers.GlobalAveragePooling2D())
model_resnet50.add(layers.Dense(1, activation='sigmoid'))
Downloading data from https://storage.googleapis.com/tensorflow/keras-applications/resnet/resnet50_weights_tf_dim_ordering_tf_kernels_notop.h5 94765736/94765736 ━━━━━━━━━━━━━━━━━━━━ 1s 0us/step
[35]:
model_resnet50.summary()
Model: "sequential_1"
┏━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━┳━━━━━━━━━━━━━━━━━━━━━━━━┳━━━━━━━━━━━━━━━┓ ┃ Layer (type) ┃ Output Shape ┃ Param # ┃ ┡━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━╇━━━━━━━━━━━━━━━━━━━━━━━━╇━━━━━━━━━━━━━━━┩ │ resnet50 (Functional) │ (None, 7, 7, 2048) │ 23,587,712 │ ├─────────────────────────────────┼────────────────────────┼───────────────┤ │ global_average_pooling2d │ (None, 2048) │ 0 │ │ (GlobalAveragePooling2D) │ │ │ ├─────────────────────────────────┼────────────────────────┼───────────────┤ │ dense_3 (Dense) │ (None, 1) │ 2,049 │ └─────────────────────────────────┴────────────────────────┴───────────────┘
Total params: 23,589,761 (89.99 MB)
Trainable params: 2,049 (8.00 KB)
Non-trainable params: 23,587,712 (89.98 MB)
[36]:
from tensorflow.keras import optimizers
model_resnet50.compile(
optimizer="adam",
loss='binary_crossentropy',
metrics=[
"accuracy",
tf.keras.metrics.Recall(name='recall'),
tf.keras.metrics.Precision(name='precision')
]
)
[37]:
history = model_resnet50.fit(
X_train, y_train,
epochs=10,
callbacks=get_callbacks('resnet50'),
validation_data=(X_val, y_val),
verbose=1
)
Epoch 1/10 350/350 ━━━━━━━━━━━━━━━━━━━━ 0s 81ms/step - accuracy: 0.5700 - loss: 0.6927 - precision: 0.5758 - recall: 0.5795 Epoch 1: val_loss improved from inf to 0.60883, saving model to best_model_resnet50.h5 350/350 ━━━━━━━━━━━━━━━━━━━━ 62s 136ms/step - accuracy: 0.5701 - loss: 0.6926 - precision: 0.5759 - recall: 0.5796 - val_accuracy: 0.6702 - val_loss: 0.6088 - val_precision: 0.6588 - val_recall: 0.7063 - learning_rate: 0.0010 Epoch 2/10 350/350 ━━━━━━━━━━━━━━━━━━━━ 0s 81ms/step - accuracy: 0.6648 - loss: 0.6096 - precision: 0.6671 - recall: 0.6752 Epoch 2: val_loss improved from 0.60883 to 0.59250, saving model to best_model_resnet50.h5 350/350 ━━━━━━━━━━━━━━━━━━━━ 41s 116ms/step - accuracy: 0.6648 - loss: 0.6096 - precision: 0.6671 - recall: 0.6752 - val_accuracy: 0.6812 - val_loss: 0.5925 - val_precision: 0.6693 - val_recall: 0.7167 - learning_rate: 0.0010 Epoch 3/10 350/350 ━━━━━━━━━━━━━━━━━━━━ 0s 81ms/step - accuracy: 0.6829 - loss: 0.5871 - precision: 0.6847 - recall: 0.6935 Epoch 3: val_loss improved from 0.59250 to 0.58348, saving model to best_model_resnet50.h5 350/350 ━━━━━━━━━━━━━━━━━━━━ 41s 116ms/step - accuracy: 0.6829 - loss: 0.5871 - precision: 0.6847 - recall: 0.6935 - val_accuracy: 0.6854 - val_loss: 0.5835 - val_precision: 0.6736 - val_recall: 0.7196 - learning_rate: 0.0010 Epoch 4/10 350/350 ━━━━━━━━━━━━━━━━━━━━ 0s 81ms/step - accuracy: 0.6993 - loss: 0.5720 - precision: 0.7013 - recall: 0.7080 Epoch 4: val_loss improved from 0.58348 to 0.57742, saving model to best_model_resnet50.h5 350/350 ━━━━━━━━━━━━━━━━━━━━ 41s 116ms/step - accuracy: 0.6993 - loss: 0.5720 - precision: 0.7013 - recall: 0.7080 - val_accuracy: 0.6915 - val_loss: 0.5774 - val_precision: 0.6803 - val_recall: 0.7225 - learning_rate: 0.0010 Epoch 5/10 350/350 ━━━━━━━━━━━━━━━━━━━━ 0s 81ms/step - accuracy: 0.7122 - loss: 0.5603 - precision: 0.7131 - recall: 0.7225 Epoch 5: val_loss improved from 0.57742 to 0.57307, saving model to best_model_resnet50.h5 350/350 ━━━━━━━━━━━━━━━━━━━━ 41s 116ms/step - accuracy: 0.7122 - loss: 0.5603 - precision: 0.7131 - recall: 0.7225 - val_accuracy: 0.6954 - val_loss: 0.5731 - val_precision: 0.6852 - val_recall: 0.7229 - learning_rate: 0.0010 Epoch 6/10 350/350 ━━━━━━━━━━━━━━━━━━━━ 0s 81ms/step - accuracy: 0.7231 - loss: 0.5507 - precision: 0.7246 - recall: 0.7313 Epoch 6: val_loss improved from 0.57307 to 0.56983, saving model to best_model_resnet50.h5 350/350 ━━━━━━━━━━━━━━━━━━━━ 41s 116ms/step - accuracy: 0.7231 - loss: 0.5507 - precision: 0.7246 - recall: 0.7313 - val_accuracy: 0.6967 - val_loss: 0.5698 - val_precision: 0.6867 - val_recall: 0.7233 - learning_rate: 0.0010 Epoch 7/10 350/350 ━━━━━━━━━━━━━━━━━━━━ 0s 81ms/step - accuracy: 0.7291 - loss: 0.5426 - precision: 0.7308 - recall: 0.7366 Epoch 7: val_loss improved from 0.56983 to 0.56737, saving model to best_model_resnet50.h5 350/350 ━━━━━━━━━━━━━━━━━━━━ 41s 117ms/step - accuracy: 0.7291 - loss: 0.5426 - precision: 0.7307 - recall: 0.7365 - val_accuracy: 0.7008 - val_loss: 0.5674 - val_precision: 0.6905 - val_recall: 0.7279 - learning_rate: 0.0010 Epoch 8/10 350/350 ━━━━━━━━━━━━━━━━━━━━ 0s 81ms/step - accuracy: 0.7341 - loss: 0.5357 - precision: 0.7358 - recall: 0.7413 Epoch 8: val_loss improved from 0.56737 to 0.56550, saving model to best_model_resnet50.h5 350/350 ━━━━━━━━━━━━━━━━━━━━ 41s 117ms/step - accuracy: 0.7341 - loss: 0.5357 - precision: 0.7358 - recall: 0.7412 - val_accuracy: 0.7006 - val_loss: 0.5655 - val_precision: 0.6916 - val_recall: 0.7242 - learning_rate: 0.0010 Epoch 9/10 350/350 ━━━━━━━━━━━━━━━━━━━━ 0s 81ms/step - accuracy: 0.7386 - loss: 0.5296 - precision: 0.7395 - recall: 0.7468 Epoch 9: val_loss improved from 0.56550 to 0.56408, saving model to best_model_resnet50.h5 350/350 ━━━━━━━━━━━━━━━━━━━━ 41s 117ms/step - accuracy: 0.7385 - loss: 0.5295 - precision: 0.7395 - recall: 0.7468 - val_accuracy: 0.7000 - val_loss: 0.5641 - val_precision: 0.6920 - val_recall: 0.7208 - learning_rate: 0.0010 Epoch 10/10 350/350 ━━━━━━━━━━━━━━━━━━━━ 0s 81ms/step - accuracy: 0.7415 - loss: 0.5241 - precision: 0.7416 - recall: 0.7513 Epoch 10: val_loss improved from 0.56408 to 0.56300, saving model to best_model_resnet50.h5 350/350 ━━━━━━━━━━━━━━━━━━━━ 41s 116ms/step - accuracy: 0.7415 - loss: 0.5241 - precision: 0.7416 - recall: 0.7512 - val_accuracy: 0.7015 - val_loss: 0.5630 - val_precision: 0.6939 - val_recall: 0.7208 - learning_rate: 0.0010 Restoring model weights from the end of the best epoch: 10.
[38]:
y_pred = model_resnet50.predict(X_test)
125/125 ━━━━━━━━━━━━━━━━━━━━ 14s 78ms/step
[39]:
y_test_pred_binary = (y_pred > 0.5).astype(int)
[40]:
y_train_pred = model_resnet50.predict(X_train)
y_train_pred_binary = (y_train_pred > 0.5).astype(int)
350/350 ━━━━━━━━━━━━━━━━━━━━ 28s 79ms/step
[41]:
train_accuracy = accuracy_score(y_train, y_train_pred_binary)
print(f"Training Accuracy: {train_accuracy * 100:.2f}%")
Training Accuracy: 75.61%
[42]:
test_accuracy = accuracy_score(y_test, y_test_pred_binary)
print(f"Test Accuracy: {test_accuracy * 100:.2f}%")
Test Accuracy: 71.15%
[43]:
f1 = f1_score(y_test, y_test_pred_binary)
print(f"F1 Score: {f1:.4f}")
precision = precision_score(y_test, y_test_pred_binary)
print(f"Precison: {precision:.4f}")
recall = recall_score(y_test, y_test_pred_binary)
print(f"Recall: {recall:.4f}")
F1 Score: 0.7126 Precison: 0.7098 Recall: 0.7155
[44]:
conf_matrix = confusion_matrix(y_test, y_test_pred_binary)
print("Confusion Matrix:")
print(conf_matrix)
Confusion Matrix: [[1415 585] [ 569 1431]]
[45]:
import seaborn as sns
sns.heatmap(conf_matrix, annot=True)
plt.xlabel('Predicted Label')
plt.ylabel('True Label')
plt.title('Confusion Matrix of custom model')
plt.tight_layout()
plt.show()
[46]:
class_report = classification_report(y_test, y_test_pred_binary)
print("Classification Report:")
print(class_report)
Classification Report:
precision recall f1-score support
0 0.71 0.71 0.71 2000
1 0.71 0.72 0.71 2000
accuracy 0.71 4000
macro avg 0.71 0.71 0.71 4000
weighted avg 0.71 0.71 0.71 4000
[47]:
import matplotlib.pyplot as plt
# Unpack training history
hist = history.history
# Create 2x2 subplot
plt.figure(figsize=(14, 10))
# Accuracy
plt.subplot(2, 2, 1)
plt.plot(hist['accuracy'], label='Train Accuracy')
plt.plot(hist['val_accuracy'], label='Val Accuracy')
plt.title('Accuracy')
plt.xlabel('Epoch')
plt.ylabel('Accuracy')
plt.legend()
# Loss
plt.subplot(2, 2, 2)
plt.plot(hist['loss'], label='Train Loss')
plt.plot(hist['val_loss'], label='Val Loss')
plt.title('Loss')
plt.xlabel('Epoch')
plt.ylabel('Loss')
plt.legend()
# Precision
plt.subplot(2, 2, 3)
plt.plot(hist['precision'], label='Train Precision')
plt.plot(hist['val_precision'], label='Val Precision')
plt.title('Precision')
plt.xlabel('Epoch')
plt.ylabel('Precision')
plt.legend()
# Recall
plt.subplot(2, 2, 4)
plt.plot(hist['recall'], label='Train Recall')
plt.plot(hist['val_recall'], label='Val Recall')
plt.title('Recall')
plt.xlabel('Epoch')
plt.ylabel('Recall')
plt.legend()
plt.tight_layout()
plt.show()
[48]:
model.save("resnet50_model_ep10.keras")
[65]:
import cv2
import numpy as np
from lime import lime_image
from skimage.segmentation import mark_boundaries
import matplotlib.pyplot as plt
[66]:
Selection deleted
# Create LIME explainer
lime_explainer = lime_image.LimeImageExplainer()
def preprocess_frame(frame):
frame = cv2.resize(frame, (IMG_SIZE, IMG_SIZE))
frame = frame.astype("float32") / 255.0
return frame
def explain_frame(frame):
# frame: (224,224,3) in [0-255]
def predict_fn(images):
images = np.array(images) / 255.0 # normalize
return model.predict(images)
explanation = lime_explainer.explain_instance(
frame,
predict_fn,
top_labels=1,
hide_color=0,
num_samples=1000
)
top_label = explanation.top_labels[0]
temp, mask = explanation.get_image_and_mask(
top_label,
positive_only=False,
num_features=10,
hide_rest=False
)
plt.imshow(mark_boundaries(temp / 255.0, mask))
plt.axis("off")
plt.show()
[67]:
Selection deleted
def predict_video(video_path, show_explanations=False):
cap = cv2.VideoCapture(video_path)
preds = []
count = 0
while cap.isOpened() and count < MAX_FRAMES:
ret, frame = cap.read()
if not ret:
break
frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
frame_resized = preprocess_frame(frame) # normalized [0-1]
frame_input = np.expand_dims(frame_resized, axis=0) # shape (1,224,224,3)
pred = model.predict(frame_input, verbose=0)[0][0]
preds.append(pred)
# Show LIME explanation every 10th frame
if show_explanations and count % 10 == 0:
print(f"Explaining frame {count}...")
explain_frame((frame_resized * 255).astype(np.uint8)) # back to [0-255]
count += 1
cap.release()
avg_score = np.mean(preds) if preds else 0.0
return ("FAKE" if avg_score > 0.5 else "REAL"), avg_score
[68]:
# --- Parameters ---
video_path = "/kaggle/input/deepfake-detection-challenge/train_sample_videos/abqwwspghj.mp4"
IMG_SIZE = 224
MAX_FRAMES = 30
[69]:
# --- Run prediction ---
label, score = predict_video(video_path, show_explanations=True)
print(f"Prediction: {label} (confidence: {score:.4f})")
Explaining frame 0...
100%
1000/1000 [00:12<00:00, 79.13it/s]
1/1 ━━━━━━━━━━━━━━━━━━━━ 0s 54ms/step 1/1 ━━━━━━━━━━━━━━━━━━━━ 0s 54ms/step 1/1 ━━━━━━━━━━━━━━━━━━━━ 0s 54ms/step 1/1 ━━━━━━━━━━━━━━━━━━━━ 0s 54ms/step 1/1 ━━━━━━━━━━━━━━━━━━━━ 0s 57ms/step 1/1 ━━━━━━━━━━━━━━━━━━━━ 0s 54ms/step 1/1 ━━━━━━━━━━━━━━━━━━━━ 0s 55ms/step 1/1 ━━━━━━━━━━━━━━━━━━━━ 0s 55ms/step 1/1 ━━━━━━━━━━━━━━━━━━━━ 0s 54ms/step 1/1 ━━━━━━━━━━━━━━━━━━━━ 0s 54ms/step 1/1 ━━━━━━━━━━━━━━━━━━━━ 0s 54ms/step 1/1 ━━━━━━━━━━━━━━━━━━━━ 0s 55ms/step 1/1 ━━━━━━━━━━━━━━━━━━━━ 0s 57ms/step 1/1 ━━━━━━━━━━━━━━━━━━━━ 0s 53ms/step 1/1 ━━━━━━━━━━━━━━━━━━━━ 0s 54ms/step 1/1 ━━━━━━━━━━━━━━━━━━━━ 0s 54ms/step 1/1 ━━━━━━━━━━━━━━━━━━━━ 0s 54ms/step 1/1 ━━━━━━━━━━━━━━━━━━━━ 0s 58ms/step 1/1 ━━━━━━━━━━━━━━━━━━━━ 0s 56ms/step 1/1 ━━━━━━━━━━━━━━━━━━━━ 0s 55ms/step 1/1 ━━━━━━━━━━━━━━━━━━━━ 0s 54ms/step 1/1 ━━━━━━━━━━━━━━━━━━━━ 0s 54ms/step 1/1 ━━━━━━━━━━━━━━━━━━━━ 0s 54ms/step 1/1 ━━━━━━━━━━━━━━━━━━━━ 0s 54ms/step 1/1 ━━━━━━━━━━━━━━━━━━━━ 0s 54ms/step 1/1 ━━━━━━━━━━━━━━━━━━━━ 0s 54ms/step 1/1 ━━━━━━━━━━━━━━━━━━━━ 0s 54ms/step 1/1 ━━━━━━━━━━━━━━━━━━━━ 0s 57ms/step 1/1 ━━━━━━━━━━━━━━━━━━━━ 0s 53ms/step 1/1 ━━━━━━━━━━━━━━━━━━━━ 0s 55ms/step 1/1 ━━━━━━━━━━━━━━━━━━━━ 0s 57ms/step 1/1 ━━━━━━━━━━━━━━━━━━━━ 0s 54ms/step 1/1 ━━━━━━━━━━━━━━━━━━━━ 0s 54ms/step 1/1 ━━━━━━━━━━━━━━━━━━━━ 0s 54ms/step 1/1 ━━━━━━━━━━━━━━━━━━━━ 0s 54ms/step 1/1 ━━━━━━━━━━━━━━━━━━━━ 0s 53ms/step 1/1 ━━━━━━━━━━━━━━━━━━━━ 0s 53ms/step 1/1 ━━━━━━━━━━━━━━━━━━━━ 0s 55ms/step 1/1 ━━━━━━━━━━━━━━━━━━━━ 0s 54ms/step 1/1 ━━━━━━━━━━━━━━━━━━━━ 0s 54ms/step 1/1 ━━━━━━━━━━━━━━━━━━━━ 0s 54ms/step 1/1 ━━━━━━━━━━━━━━━━━━━━ 0s 54ms/step 1/1 ━━━━━━━━━━━━━━━━━━━━ 0s 54ms/step 1/1 ━━━━━━━━━━━━━━━━━━━━ 0s 54ms/step 1/1 ━━━━━━━━━━━━━━━━━━━━ 0s 57ms/step 1/1 ━━━━━━━━━━━━━━━━━━━━ 0s 54ms/step 1/1 ━━━━━━━━━━━━━━━━━━━━ 0s 54ms/step 1/1 ━━━━━━━━━━━━━━━━━━━━ 0s 55ms/step 1/1 ━━━━━━━━━━━━━━━━━━━━ 0s 53ms/step 1/1 ━━━━━━━━━━━━━━━━━━━━ 0s 54ms/step 1/1 ━━━━━━━━━━━━━━━━━━━━ 0s 53ms/step 1/1 ━━━━━━━━━━━━━━━━━━━━ 0s 53ms/step 1/1 ━━━━━━━━━━━━━━━━━━━━ 0s 53ms/step 1/1 ━━━━━━━━━━━━━━━━━━━━ 0s 54ms/step 1/1 ━━━━━━━━━━━━━━━━━━━━ 0s 60ms/step 1/1 ━━━━━━━━━━━━━━━━━━━━ 0s 54ms/step 1/1 ━━━━━━━━━━━━━━━━━━━━ 0s 54ms/step 1/1 ━━━━━━━━━━━━━━━━━━━━ 0s 54ms/step 1/1 ━━━━━━━━━━━━━━━━━━━━ 0s 53ms/step 1/1 ━━━━━━━━━━━━━━━━━━━━ 0s 55ms/step 1/1 ━━━━━━━━━━━━━━━━━━━━ 0s 54ms/step 1/1 ━━━━━━━━━━━━━━━━━━━━ 0s 57ms/step 1/1 ━━━━━━━━━━━━━━━━━━━━ 0s 54ms/step 1/1 ━━━━━━━━━━━━━━━━━━━━ 0s 53ms/step 1/1 ━━━━━━━━━━━━━━━━━━━━ 0s 53ms/step 1/1 ━━━━━━━━━━━━━━━━━━━━ 0s 54ms/step 1/1 ━━━━━━━━━━━━━━━━━━━━ 0s 58ms/step 1/1 ━━━━━━━━━━━━━━━━━━━━ 0s 55ms/step 1/1 ━━━━━━━━━━━━━━━━━━━━ 0s 54ms/step 1/1 ━━━━━━━━━━━━━━━━━━━━ 0s 54ms/step 1/1 ━━━━━━━━━━━━━━━━━━━━ 0s 54ms/step 1/1 ━━━━━━━━━━━━━━━━━━━━ 0s 54ms/step 1/1 ━━━━━━━━━━━━━━━━━━━━ 0s 53ms/step 1/1 ━━━━━━━━━━━━━━━━━━━━ 0s 53ms/step 1/1 ━━━━━━━━━━━━━━━━━━━━ 0s 54ms/step 1/1 ━━━━━━━━━━━━━━━━━━━━ 0s 56ms/step 1/1 ━━━━━━━━━━━━━━━━━━━━ 0s 53ms/step 1/1 ━━━━━━━━━━━━━━━━━━━━ 0s 55ms/step 1/1 ━━━━━━━━━━━━━━━━━━━━ 0s 55ms/step 1/1 ━━━━━━━━━━━━━━━━━━━━ 0s 55ms/step 1/1 ━━━━━━━━━━━━━━━━━━━━ 0s 54ms/step 1/1 ━━━━━━━━━━━━━━━━━━━━ 0s 54ms/step 1/1 ━━━━━━━━━━━━━━━━━━━━ 0s 54ms/step 1/1 ━━━━━━━━━━━━━━━━━━━━ 0s 53ms/step 1/1 ━━━━━━━━━━━━━━━━━━━━ 0s 54ms/step 1/1 ━━━━━━━━━━━━━━━━━━━━ 0s 53ms/step 1/1 ━━━━━━━━━━━━━━━━━━━━ 0s 55ms/step 1/1 ━━━━━━━━━━━━━━━━━━━━ 0s 53ms/step 1/1 ━━━━━━━━━━━━━━━━━━━━ 0s 54ms/step 1/1 ━━━━━━━━━━━━━━━━━━━━ 0s 54ms/step 1/1 ━━━━━━━━━━━━━━━━━━━━ 0s 57ms/step 1/1 ━━━━━━━━━━━━━━━━━━━━ 0s 55ms/step 1/1 ━━━━━━━━━━━━━━━━━━━━ 0s 54ms/step 1/1 ━━━━━━━━━━━━━━━━━━━━ 0s 53ms/step 1/1 ━━━━━━━━━━━━━━━━━━━━ 0s 53ms/step 1/1 ━━━━━━━━━━━━━━━━━━━━ 0s 53ms/step 1/1 ━━━━━━━━━━━━━━━━━━━━ 0s 54ms/step 1/1 ━━━━━━━━━━━━━━━━━━━━ 0s 54ms/step 1/1 ━━━━━━━━━━━━━━━━━━━━ 0s 53ms/step 1/1 ━━━━━━━━━━━━━━━━━━━━ 0s 55ms/step
Explaining frame 10...
100%
1000/1000 [00:12<00:00, 78.47it/s]
1/1 ━━━━━━━━━━━━━━━━━━━━ 0s 56ms/step 1/1 ━━━━━━━━━━━━━━━━━━━━ 0s 56ms/step 1/1 ━━━━━━━━━━━━━━━━━━━━ 0s 54ms/step 1/1 ━━━━━━━━━━━━━━━━━━━━ 0s 58ms/step 1/1 ━━━━━━━━━━━━━━━━━━━━ 0s 53ms/step 1/1 ━━━━━━━━━━━━━━━━━━━━ 0s 54ms/step 1/1 ━━━━━━━━━━━━━━━━━━━━ 0s 54ms/step 1/1 ━━━━━━━━━━━━━━━━━━━━ 0s 56ms/step 1/1 ━━━━━━━━━━━━━━━━━━━━ 0s 54ms/step 1/1 ━━━━━━━━━━━━━━━━━━━━ 0s 54ms/step 1/1 ━━━━━━━━━━━━━━━━━━━━ 0s 53ms/step 1/1 ━━━━━━━━━━━━━━━━━━━━ 0s 55ms/step 1/1 ━━━━━━━━━━━━━━━━━━━━ 0s 54ms/step 1/1 ━━━━━━━━━━━━━━━━━━━━ 0s 54ms/step 1/1 ━━━━━━━━━━━━━━━━━━━━ 0s 55ms/step 1/1 ━━━━━━━━━━━━━━━━━━━━ 0s 54ms/step 1/1 ━━━━━━━━━━━━━━━━━━━━ 0s 55ms/step 1/1 ━━━━━━━━━━━━━━━━━━━━ 0s 54ms/step 1/1 ━━━━━━━━━━━━━━━━━━━━ 0s 54ms/step 1/1 ━━━━━━━━━━━━━━━━━━━━ 0s 55ms/step 1/1 ━━━━━━━━━━━━━━━━━━━━ 0s 53ms/step 1/1 ━━━━━━━━━━━━━━━━━━━━ 0s 53ms/step 1/1 ━━━━━━━━━━━━━━━━━━━━ 0s 56ms/step 1/1 ━━━━━━━━━━━━━━━━━━━━ 0s 54ms/step 1/1 ━━━━━━━━━━━━━━━━━━━━ 0s 54ms/step 1/1 ━━━━━━━━━━━━━━━━━━━━ 0s 54ms/step 1/1 ━━━━━━━━━━━━━━━━━━━━ 0s 54ms/step 1/1 ━━━━━━━━━━━━━━━━━━━━ 0s 59ms/step 1/1 ━━━━━━━━━━━━━━━━━━━━ 0s 61ms/step 1/1 ━━━━━━━━━━━━━━━━━━━━ 0s 54ms/step 1/1 ━━━━━━━━━━━━━━━━━━━━ 0s 55ms/step 1/1 ━━━━━━━━━━━━━━━━━━━━ 0s 53ms/step 1/1 ━━━━━━━━━━━━━━━━━━━━ 0s 53ms/step 1/1 ━━━━━━━━━━━━━━━━━━━━ 0s 53ms/step 1/1 ━━━━━━━━━━━━━━━━━━━━ 0s 53ms/step 1/1 ━━━━━━━━━━━━━━━━━━━━ 0s 54ms/step 1/1 ━━━━━━━━━━━━━━━━━━━━ 0s 54ms/step 1/1 ━━━━━━━━━━━━━━━━━━━━ 0s 54ms/step 1/1 ━━━━━━━━━━━━━━━━━━━━ 0s 54ms/step 1/1 ━━━━━━━━━━━━━━━━━━━━ 0s 53ms/step 1/1 ━━━━━━━━━━━━━━━━━━━━ 0s 54ms/step 1/1 ━━━━━━━━━━━━━━━━━━━━ 0s 54ms/step 1/1 ━━━━━━━━━━━━━━━━━━━━ 0s 53ms/step 1/1 ━━━━━━━━━━━━━━━━━━━━ 0s 54ms/step 1/1 ━━━━━━━━━━━━━━━━━━━━ 0s 53ms/step 1/1 ━━━━━━━━━━━━━━━━━━━━ 0s 53ms/step 1/1 ━━━━━━━━━━━━━━━━━━━━ 0s 54ms/step 1/1 ━━━━━━━━━━━━━━━━━━━━ 0s 54ms/step 1/1 ━━━━━━━━━━━━━━━━━━━━ 0s 54ms/step 1/1 ━━━━━━━━━━━━━━━━━━━━ 0s 53ms/step 1/1 ━━━━━━━━━━━━━━━━━━━━ 0s 53ms/step 1/1 ━━━━━━━━━━━━━━━━━━━━ 0s 54ms/step 1/1 ━━━━━━━━━━━━━━━━━━━━ 0s 56ms/step 1/1 ━━━━━━━━━━━━━━━━━━━━ 0s 55ms/step 1/1 ━━━━━━━━━━━━━━━━━━━━ 0s 54ms/step 1/1 ━━━━━━━━━━━━━━━━━━━━ 0s 53ms/step 1/1 ━━━━━━━━━━━━━━━━━━━━ 0s 54ms/step 1/1 ━━━━━━━━━━━━━━━━━━━━ 0s 53ms/step 1/1 ━━━━━━━━━━━━━━━━━━━━ 0s 53ms/step 1/1 ━━━━━━━━━━━━━━━━━━━━ 0s 54ms/step 1/1 ━━━━━━━━━━━━━━━━━━━━ 0s 55ms/step 1/1 ━━━━━━━━━━━━━━━━━━━━ 0s 54ms/step 1/1 ━━━━━━━━━━━━━━━━━━━━ 0s 53ms/step 1/1 ━━━━━━━━━━━━━━━━━━━━ 0s 54ms/step 1/1 ━━━━━━━━━━━━━━━━━━━━ 0s 54ms/step 1/1 ━━━━━━━━━━━━━━━━━━━━ 0s 53ms/step 1/1 ━━━━━━━━━━━━━━━━━━━━ 0s 54ms/step 1/1 ━━━━━━━━━━━━━━━━━━━━ 0s 53ms/step 1/1 ━━━━━━━━━━━━━━━━━━━━ 0s 55ms/step 1/1 ━━━━━━━━━━━━━━━━━━━━ 0s 54ms/step 1/1 ━━━━━━━━━━━━━━━━━━━━ 0s 55ms/step 1/1 ━━━━━━━━━━━━━━━━━━━━ 0s 54ms/step 1/1 ━━━━━━━━━━━━━━━━━━━━ 0s 54ms/step 1/1 ━━━━━━━━━━━━━━━━━━━━ 0s 54ms/step 1/1 ━━━━━━━━━━━━━━━━━━━━ 0s 56ms/step 1/1 ━━━━━━━━━━━━━━━━━━━━ 0s 57ms/step 1/1 ━━━━━━━━━━━━━━━━━━━━ 0s 54ms/step 1/1 ━━━━━━━━━━━━━━━━━━━━ 0s 54ms/step 1/1 ━━━━━━━━━━━━━━━━━━━━ 0s 55ms/step 1/1 ━━━━━━━━━━━━━━━━━━━━ 0s 54ms/step 1/1 ━━━━━━━━━━━━━━━━━━━━ 0s 53ms/step 1/1 ━━━━━━━━━━━━━━━━━━━━ 0s 53ms/step 1/1 ━━━━━━━━━━━━━━━━━━━━ 0s 53ms/step 1/1 ━━━━━━━━━━━━━━━━━━━━ 0s 54ms/step 1/1 ━━━━━━━━━━━━━━━━━━━━ 0s 54ms/step 1/1 ━━━━━━━━━━━━━━━━━━━━ 0s 55ms/step 1/1 ━━━━━━━━━━━━━━━━━━━━ 0s 54ms/step 1/1 ━━━━━━━━━━━━━━━━━━━━ 0s 55ms/step 1/1 ━━━━━━━━━━━━━━━━━━━━ 0s 54ms/step 1/1 ━━━━━━━━━━━━━━━━━━━━ 0s 54ms/step 1/1 ━━━━━━━━━━━━━━━━━━━━ 0s 55ms/step 1/1 ━━━━━━━━━━━━━━━━━━━━ 0s 54ms/step 1/1 ━━━━━━━━━━━━━━━━━━━━ 0s 54ms/step 1/1 ━━━━━━━━━━━━━━━━━━━━ 0s 55ms/step 1/1 ━━━━━━━━━━━━━━━━━━━━ 0s 54ms/step 1/1 ━━━━━━━━━━━━━━━━━━━━ 0s 54ms/step 1/1 ━━━━━━━━━━━━━━━━━━━━ 0s 53ms/step 1/1 ━━━━━━━━━━━━━━━━━━━━ 0s 56ms/step 1/1 ━━━━━━━━━━━━━━━━━━━━ 0s 54ms/step 1/1 ━━━━━━━━━━━━━━━━━━━━ 0s 54ms/step
Explaining frame 20...
100%
1000/1000 [00:12<00:00, 79.84it/s]
1/1 ━━━━━━━━━━━━━━━━━━━━ 0s 53ms/step 1/1 ━━━━━━━━━━━━━━━━━━━━ 0s 53ms/step 1/1 ━━━━━━━━━━━━━━━━━━━━ 0s 53ms/step 1/1 ━━━━━━━━━━━━━━━━━━━━ 0s 53ms/step 1/1 ━━━━━━━━━━━━━━━━━━━━ 0s 54ms/step 1/1 ━━━━━━━━━━━━━━━━━━━━ 0s 55ms/step 1/1 ━━━━━━━━━━━━━━━━━━━━ 0s 54ms/step 1/1 ━━━━━━━━━━━━━━━━━━━━ 0s 53ms/step 1/1 ━━━━━━━━━━━━━━━━━━━━ 0s 54ms/step 1/1 ━━━━━━━━━━━━━━━━━━━━ 0s 54ms/step 1/1 ━━━━━━━━━━━━━━━━━━━━ 0s 53ms/step 1/1 ━━━━━━━━━━━━━━━━━━━━ 0s 53ms/step 1/1 ━━━━━━━━━━━━━━━━━━━━ 0s 53ms/step 1/1 ━━━━━━━━━━━━━━━━━━━━ 0s 54ms/step 1/1 ━━━━━━━━━━━━━━━━━━━━ 0s 54ms/step 1/1 ━━━━━━━━━━━━━━━━━━━━ 0s 54ms/step 1/1 ━━━━━━━━━━━━━━━━━━━━ 0s 54ms/step 1/1 ━━━━━━━━━━━━━━━━━━━━ 0s 54ms/step 1/1 ━━━━━━━━━━━━━━━━━━━━ 0s 53ms/step 1/1 ━━━━━━━━━━━━━━━━━━━━ 0s 54ms/step 1/1 ━━━━━━━━━━━━━━━━━━━━ 0s 54ms/step 1/1 ━━━━━━━━━━━━━━━━━━━━ 0s 54ms/step 1/1 ━━━━━━━━━━━━━━━━━━━━ 0s 54ms/step 1/1 ━━━━━━━━━━━━━━━━━━━━ 0s 54ms/step 1/1 ━━━━━━━━━━━━━━━━━━━━ 0s 54ms/step 1/1 ━━━━━━━━━━━━━━━━━━━━ 0s 53ms/step 1/1 ━━━━━━━━━━━━━━━━━━━━ 0s 54ms/step 1/1 ━━━━━━━━━━━━━━━━━━━━ 0s 54ms/step 1/1 ━━━━━━━━━━━━━━━━━━━━ 0s 54ms/step 1/1 ━━━━━━━━━━━━━━━━━━━━ 0s 54ms/step 1/1 ━━━━━━━━━━━━━━━━━━━━ 0s 57ms/step 1/1 ━━━━━━━━━━━━━━━━━━━━ 0s 56ms/step 1/1 ━━━━━━━━━━━━━━━━━━━━ 0s 54ms/step 1/1 ━━━━━━━━━━━━━━━━━━━━ 0s 54ms/step 1/1 ━━━━━━━━━━━━━━━━━━━━ 0s 53ms/step 1/1 ━━━━━━━━━━━━━━━━━━━━ 0s 54ms/step 1/1 ━━━━━━━━━━━━━━━━━━━━ 0s 54ms/step 1/1 ━━━━━━━━━━━━━━━━━━━━ 0s 54ms/step 1/1 ━━━━━━━━━━━━━━━━━━━━ 0s 54ms/step 1/1 ━━━━━━━━━━━━━━━━━━━━ 0s 54ms/step 1/1 ━━━━━━━━━━━━━━━━━━━━ 0s 53ms/step 1/1 ━━━━━━━━━━━━━━━━━━━━ 0s 54ms/step 1/1 ━━━━━━━━━━━━━━━━━━━━ 0s 53ms/step 1/1 ━━━━━━━━━━━━━━━━━━━━ 0s 54ms/step 1/1 ━━━━━━━━━━━━━━━━━━━━ 0s 54ms/step 1/1 ━━━━━━━━━━━━━━━━━━━━ 0s 56ms/step 1/1 ━━━━━━━━━━━━━━━━━━━━ 0s 54ms/step 1/1 ━━━━━━━━━━━━━━━━━━━━ 0s 54ms/step 1/1 ━━━━━━━━━━━━━━━━━━━━ 0s 55ms/step 1/1 ━━━━━━━━━━━━━━━━━━━━ 0s 53ms/step 1/1 ━━━━━━━━━━━━━━━━━━━━ 0s 53ms/step 1/1 ━━━━━━━━━━━━━━━━━━━━ 0s 54ms/step 1/1 ━━━━━━━━━━━━━━━━━━━━ 0s 55ms/step 1/1 ━━━━━━━━━━━━━━━━━━━━ 0s 55ms/step 1/1 ━━━━━━━━━━━━━━━━━━━━ 0s 54ms/step 1/1 ━━━━━━━━━━━━━━━━━━━━ 0s 54ms/step 1/1 ━━━━━━━━━━━━━━━━━━━━ 0s 53ms/step 1/1 ━━━━━━━━━━━━━━━━━━━━ 0s 54ms/step 1/1 ━━━━━━━━━━━━━━━━━━━━ 0s 53ms/step 1/1 ━━━━━━━━━━━━━━━━━━━━ 0s 54ms/step 1/1 ━━━━━━━━━━━━━━━━━━━━ 0s 53ms/step 1/1 ━━━━━━━━━━━━━━━━━━━━ 0s 53ms/step 1/1 ━━━━━━━━━━━━━━━━━━━━ 0s 53ms/step 1/1 ━━━━━━━━━━━━━━━━━━━━ 0s 53ms/step 1/1 ━━━━━━━━━━━━━━━━━━━━ 0s 53ms/step 1/1 ━━━━━━━━━━━━━━━━━━━━ 0s 54ms/step 1/1 ━━━━━━━━━━━━━━━━━━━━ 0s 53ms/step 1/1 ━━━━━━━━━━━━━━━━━━━━ 0s 54ms/step 1/1 ━━━━━━━━━━━━━━━━━━━━ 0s 54ms/step 1/1 ━━━━━━━━━━━━━━━━━━━━ 0s 53ms/step 1/1 ━━━━━━━━━━━━━━━━━━━━ 0s 54ms/step 1/1 ━━━━━━━━━━━━━━━━━━━━ 0s 54ms/step 1/1 ━━━━━━━━━━━━━━━━━━━━ 0s 54ms/step 1/1 ━━━━━━━━━━━━━━━━━━━━ 0s 56ms/step 1/1 ━━━━━━━━━━━━━━━━━━━━ 0s 57ms/step 1/1 ━━━━━━━━━━━━━━━━━━━━ 0s 53ms/step 1/1 ━━━━━━━━━━━━━━━━━━━━ 0s 53ms/step 1/1 ━━━━━━━━━━━━━━━━━━━━ 0s 54ms/step 1/1 ━━━━━━━━━━━━━━━━━━━━ 0s 54ms/step 1/1 ━━━━━━━━━━━━━━━━━━━━ 0s 54ms/step 1/1 ━━━━━━━━━━━━━━━━━━━━ 0s 54ms/step 1/1 ━━━━━━━━━━━━━━━━━━━━ 0s 54ms/step 1/1 ━━━━━━━━━━━━━━━━━━━━ 0s 54ms/step 1/1 ━━━━━━━━━━━━━━━━━━━━ 0s 54ms/step 1/1 ━━━━━━━━━━━━━━━━━━━━ 0s 54ms/step 1/1 ━━━━━━━━━━━━━━━━━━━━ 0s 54ms/step 1/1 ━━━━━━━━━━━━━━━━━━━━ 0s 53ms/step 1/1 ━━━━━━━━━━━━━━━━━━━━ 0s 53ms/step 1/1 ━━━━━━━━━━━━━━━━━━━━ 0s 54ms/step 1/1 ━━━━━━━━━━━━━━━━━━━━ 0s 56ms/step 1/1 ━━━━━━━━━━━━━━━━━━━━ 0s 55ms/step 1/1 ━━━━━━━━━━━━━━━━━━━━ 0s 55ms/step 1/1 ━━━━━━━━━━━━━━━━━━━━ 0s 55ms/step 1/1 ━━━━━━━━━━━━━━━━━━━━ 0s 54ms/step 1/1 ━━━━━━━━━━━━━━━━━━━━ 0s 54ms/step 1/1 ━━━━━━━━━━━━━━━━━━━━ 0s 53ms/step 1/1 ━━━━━━━━━━━━━━━━━━━━ 0s 54ms/step 1/1 ━━━━━━━━━━━━━━━━━━━━ 0s 53ms/step 1/1 ━━━━━━━━━━━━━━━━━━━━ 0s 53ms/step 1/1 ━━━━━━━━━━━━━━━━━━━━ 0s 54ms/step
Prediction: FAKE (confidence: 0.5130)
[70]:
Selection deleted
model.save("deepfake_resnet50.h5")
[72]:
from IPython.display import FileLink
# This will create the download link for your file
FileLink("/kaggle/working/deepfake_resnet50.h5")
play_arrow
Notebook cell shifted up successfully
Common Tools
No metadata.
Advanced Tools
No metadata.